In [36]:
import numpy as np
import pandas as pd
In [37]:
from sklearn.datasets import load_boston
In [38]:
boston = load_boston()
In [39]:
data = pd.DataFrame(boston.data)
In [40]:
data.head()
Out[40]:
0 1 2 3 4 5 6 7 8 9 10 11 12
0 0.00632 18.0 2.31 0.0 0.538 6.575 65.2 4.0900 1.0 296.0 15.3 396.90 4.98
1 0.02731 0.0 7.07 0.0 0.469 6.421 78.9 4.9671 2.0 242.0 17.8 396.90 9.14
2 0.02729 0.0 7.07 0.0 0.469 7.185 61.1 4.9671 2.0 242.0 17.8 392.83 4.03
3 0.03237 0.0 2.18 0.0 0.458 6.998 45.8 6.0622 3.0 222.0 18.7 394.63 2.94
4 0.06905 0.0 2.18 0.0 0.458 7.147 54.2 6.0622 3.0 222.0 18.7 396.90 5.33
In [41]:
data.columns = boston.feature_names
In [42]:
data['PRICE'] = boston.target
In [43]:
data.head(n=10)
Out[43]:
CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT PRICE
0 0.00632 18.0 2.31 0.0 0.538 6.575 65.2 4.0900 1.0 296.0 15.3 396.90 4.98 24.0
1 0.02731 0.0 7.07 0.0 0.469 6.421 78.9 4.9671 2.0 242.0 17.8 396.90 9.14 21.6
2 0.02729 0.0 7.07 0.0 0.469 7.185 61.1 4.9671 2.0 242.0 17.8 392.83 4.03 34.7
3 0.03237 0.0 2.18 0.0 0.458 6.998 45.8 6.0622 3.0 222.0 18.7 394.63 2.94 33.4
4 0.06905 0.0 2.18 0.0 0.458 7.147 54.2 6.0622 3.0 222.0 18.7 396.90 5.33 36.2
5 0.02985 0.0 2.18 0.0 0.458 6.430 58.7 6.0622 3.0 222.0 18.7 394.12 5.21 28.7
6 0.08829 12.5 7.87 0.0 0.524 6.012 66.6 5.5605 5.0 311.0 15.2 395.60 12.43 22.9
7 0.14455 12.5 7.87 0.0 0.524 6.172 96.1 5.9505 5.0 311.0 15.2 396.90 19.15 27.1
8 0.21124 12.5 7.87 0.0 0.524 5.631 100.0 6.0821 5.0 311.0 15.2 386.63 29.93 16.5
9 0.17004 12.5 7.87 0.0 0.524 6.004 85.9 6.5921 5.0 311.0 15.2 386.71 17.10 18.9
In [44]:
print(data.shape)
(506, 14)
In [45]:
data.describe()
Out[45]:
CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT PRICE
count 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000
mean 3.613524 11.363636 11.136779 0.069170 0.554695 6.284634 68.574901 3.795043 9.549407 408.237154 18.455534 356.674032 12.653063 22.532806
std 8.601545 23.322453 6.860353 0.253994 0.115878 0.702617 28.148861 2.105710 8.707259 168.537116 2.164946 91.294864 7.141062 9.197104
min 0.006320 0.000000 0.460000 0.000000 0.385000 3.561000 2.900000 1.129600 1.000000 187.000000 12.600000 0.320000 1.730000 5.000000
25% 0.082045 0.000000 5.190000 0.000000 0.449000 5.885500 45.025000 2.100175 4.000000 279.000000 17.400000 375.377500 6.950000 17.025000
50% 0.256510 0.000000 9.690000 0.000000 0.538000 6.208500 77.500000 3.207450 5.000000 330.000000 19.050000 391.440000 11.360000 21.200000
75% 3.677083 12.500000 18.100000 0.000000 0.624000 6.623500 94.075000 5.188425 24.000000 666.000000 20.200000 396.225000 16.955000 25.000000
max 88.976200 100.000000 27.740000 1.000000 0.871000 8.780000 100.000000 12.126500 24.000000 711.000000 22.000000 396.900000 37.970000 50.000000
In [46]:
data.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 14 columns):
 #   Column   Non-Null Count  Dtype  
---  ------   --------------  -----  
 0   CRIM     506 non-null    float64
 1   ZN       506 non-null    float64
 2   INDUS    506 non-null    float64
 3   CHAS     506 non-null    float64
 4   NOX      506 non-null    float64
 5   RM       506 non-null    float64
 6   AGE      506 non-null    float64
 7   DIS      506 non-null    float64
 8   RAD      506 non-null    float64
 9   TAX      506 non-null    float64
 10  PTRATIO  506 non-null    float64
 11  B        506 non-null    float64
 12  LSTAT    506 non-null    float64
 13  PRICE    506 non-null    float64
dtypes: float64(14)
memory usage: 55.5 KB
In [47]:
import seaborn as sns
In [48]:
sns.distplot(data.PRICE)
Out[48]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fa660588190>
In [49]:
sns.boxplot(data.PRICE)
Out[49]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fa643927550>
In [50]:
correlation = data.corr()
correlation.loc['PRICE']
Out[50]:
CRIM      -0.388305
ZN         0.360445
INDUS     -0.483725
CHAS       0.175260
NOX       -0.427321
RM         0.695360
AGE       -0.376955
DIS        0.249929
RAD       -0.381626
TAX       -0.468536
PTRATIO   -0.507787
B          0.333461
LSTAT     -0.737663
PRICE      1.000000
Name: PRICE, dtype: float64
In [51]:
import matplotlib.pyplot as plt
In [52]:
import matplotlib.pyplot as plt
fig,axes = plt.subplots(figsize=(15,12))
sns.heatmap(correlation,square = True,annot = True)
Out[52]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fa6438b0490>
In [53]:
plt.figure(figsize = (20,5))
features = ['LSTAT','RM','PTRATIO']
for i, col in enumerate(features):
    plt.subplot(1, len(features) , i+1)
    x = data[col]
    y = data.PRICE
    plt.scatter(x, y, marker='o')
    plt.title("Variation in House prices")
    plt.xlabel(col)
    plt.ylabel('"House prices in $1000"')
In [54]:
X = data.iloc[:,:-1]
y= data.PRICE
In [55]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
In [56]:
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
In [57]:
from sklearn.linear_model import LinearRegression
In [58]:
regressor = LinearRegression()
In [59]:
regressor.fit(X_train,y_train)
Out[59]:
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
In [60]:
y_pred = regressor.predict(X_test)
In [61]:
from sklearn.metrics import mean_squared_error
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
print(rmse)
4.552364598463062
In [62]:
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_pred)
print(r2)
0.7261570836552476
In [63]:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
In [64]:
import keras
In [65]:
import tensorflow as tf
In [66]:
from keras.layers import Dense, Activation,Dropout
from keras.models import Sequential
In [67]:
model = Sequential()
model.add(Dense(128,activation = 'relu',input_dim =13))
model.add(Dense(64,activation = 'relu'))
In [68]:
model.add(Dense(32,activation = 'relu'))
model.add(Dense(16,activation = 'relu'))
model.add(Dense(1))
In [69]:
model.compile(optimizer = 'adam',loss ='mean_squared_error',metrics=['mae'])
In [70]:
!pip install ann_visualizer
!pip install graphviz
Requirement already satisfied: ann_visualizer in ./anaconda3/lib/python3.7/site-packages (2.5)
Requirement already satisfied: graphviz in ./anaconda3/lib/python3.7/site-packages (0.20.1)
In [71]:
from ann_visualizer.visualize import ann_viz;
In [72]:
ann_viz(model, title="DEMO ANN");
In [73]:
history = model.fit(X_train, y_train, epochs=100, validation_split=0.05)
Epoch 1/100
11/11 [==============================] - 0s 16ms/step - loss: 588.9966 - mae: 22.3586 - val_loss: 505.0154 - val_mae: 20.7000
Epoch 2/100
11/11 [==============================] - 0s 2ms/step - loss: 503.1987 - mae: 20.3758 - val_loss: 395.8688 - val_mae: 17.9121
Epoch 3/100
11/11 [==============================] - 0s 3ms/step - loss: 352.6916 - mae: 16.3034 - val_loss: 211.7909 - val_mae: 11.8841
Epoch 4/100
11/11 [==============================] - 0s 3ms/step - loss: 142.9678 - mae: 9.8284 - val_loss: 75.2975 - val_mae: 5.2240
Epoch 5/100
11/11 [==============================] - 0s 2ms/step - loss: 80.8291 - mae: 6.9493 - val_loss: 78.7917 - val_mae: 5.6193
Epoch 6/100
11/11 [==============================] - 0s 2ms/step - loss: 47.1580 - mae: 5.1156 - val_loss: 79.3199 - val_mae: 5.2359
Epoch 7/100
11/11 [==============================] - 0s 2ms/step - loss: 35.2439 - mae: 4.3305 - val_loss: 73.5953 - val_mae: 5.2775
Epoch 8/100
11/11 [==============================] - 0s 2ms/step - loss: 27.3548 - mae: 3.8664 - val_loss: 68.3758 - val_mae: 5.3364
Epoch 9/100
11/11 [==============================] - 0s 2ms/step - loss: 24.2702 - mae: 3.6847 - val_loss: 64.0256 - val_mae: 4.9732
Epoch 10/100
11/11 [==============================] - 0s 2ms/step - loss: 22.7235 - mae: 3.5662 - val_loss: 60.4270 - val_mae: 4.9706
Epoch 11/100
11/11 [==============================] - 0s 2ms/step - loss: 19.8159 - mae: 3.3015 - val_loss: 67.9518 - val_mae: 5.4029
Epoch 12/100
11/11 [==============================] - 0s 2ms/step - loss: 19.0358 - mae: 3.1300 - val_loss: 68.0323 - val_mae: 5.4126
Epoch 13/100
11/11 [==============================] - 0s 2ms/step - loss: 18.2876 - mae: 3.1180 - val_loss: 60.7587 - val_mae: 5.1968
Epoch 14/100
11/11 [==============================] - 0s 3ms/step - loss: 17.5243 - mae: 3.0516 - val_loss: 60.2654 - val_mae: 5.0317
Epoch 15/100
11/11 [==============================] - 0s 2ms/step - loss: 16.6951 - mae: 2.9952 - val_loss: 57.1266 - val_mae: 5.0708
Epoch 16/100
11/11 [==============================] - 0s 2ms/step - loss: 15.5733 - mae: 2.8761 - val_loss: 54.0615 - val_mae: 4.7828
Epoch 17/100
11/11 [==============================] - 0s 2ms/step - loss: 15.0501 - mae: 2.8110 - val_loss: 54.9937 - val_mae: 4.9215
Epoch 18/100
11/11 [==============================] - 0s 2ms/step - loss: 14.6684 - mae: 2.7916 - val_loss: 53.1280 - val_mae: 4.7423
Epoch 19/100
11/11 [==============================] - 0s 2ms/step - loss: 15.1002 - mae: 2.8293 - val_loss: 54.0508 - val_mae: 4.7981
Epoch 20/100
11/11 [==============================] - 0s 2ms/step - loss: 14.0888 - mae: 2.7627 - val_loss: 48.1808 - val_mae: 4.7486
Epoch 21/100
11/11 [==============================] - 0s 2ms/step - loss: 15.1644 - mae: 2.8874 - val_loss: 40.9429 - val_mae: 4.2956
Epoch 22/100
11/11 [==============================] - 0s 2ms/step - loss: 14.0077 - mae: 2.7819 - val_loss: 44.1783 - val_mae: 4.5083
Epoch 23/100
11/11 [==============================] - 0s 2ms/step - loss: 13.5004 - mae: 2.7089 - val_loss: 43.1713 - val_mae: 4.6612
Epoch 24/100
11/11 [==============================] - 0s 2ms/step - loss: 12.9231 - mae: 2.6468 - val_loss: 45.4527 - val_mae: 4.5690
Epoch 25/100
11/11 [==============================] - 0s 2ms/step - loss: 12.3909 - mae: 2.5834 - val_loss: 46.2506 - val_mae: 4.4459
Epoch 26/100
11/11 [==============================] - 0s 2ms/step - loss: 12.1494 - mae: 2.5576 - val_loss: 46.6642 - val_mae: 4.6466
Epoch 27/100
11/11 [==============================] - 0s 2ms/step - loss: 11.9093 - mae: 2.5301 - val_loss: 43.3212 - val_mae: 4.3452
Epoch 28/100
11/11 [==============================] - 0s 2ms/step - loss: 12.2498 - mae: 2.5500 - val_loss: 44.3994 - val_mae: 4.4654
Epoch 29/100
11/11 [==============================] - 0s 2ms/step - loss: 12.0247 - mae: 2.5283 - val_loss: 44.8300 - val_mae: 4.3825
Epoch 30/100
11/11 [==============================] - 0s 2ms/step - loss: 11.7942 - mae: 2.5174 - val_loss: 44.1683 - val_mae: 4.5169
Epoch 31/100
11/11 [==============================] - 0s 1ms/step - loss: 11.0529 - mae: 2.4259 - val_loss: 43.3114 - val_mae: 4.3390
Epoch 32/100
11/11 [==============================] - 0s 2ms/step - loss: 10.9677 - mae: 2.4153 - val_loss: 44.4978 - val_mae: 4.3837
Epoch 33/100
11/11 [==============================] - 0s 2ms/step - loss: 10.8758 - mae: 2.4129 - val_loss: 44.2788 - val_mae: 4.4263
Epoch 34/100
11/11 [==============================] - 0s 2ms/step - loss: 10.8577 - mae: 2.4441 - val_loss: 41.9999 - val_mae: 4.3054
Epoch 35/100
11/11 [==============================] - 0s 2ms/step - loss: 10.5178 - mae: 2.3562 - val_loss: 41.9991 - val_mae: 4.1659
Epoch 36/100
11/11 [==============================] - 0s 2ms/step - loss: 10.6169 - mae: 2.3866 - val_loss: 39.5672 - val_mae: 4.0079
Epoch 37/100
11/11 [==============================] - 0s 2ms/step - loss: 11.4979 - mae: 2.4651 - val_loss: 40.8920 - val_mae: 4.1922
Epoch 38/100
11/11 [==============================] - 0s 2ms/step - loss: 10.8410 - mae: 2.4705 - val_loss: 40.0690 - val_mae: 4.2955
Epoch 39/100
11/11 [==============================] - 0s 1ms/step - loss: 9.9876 - mae: 2.3050 - val_loss: 39.0875 - val_mae: 3.9805
Epoch 40/100
11/11 [==============================] - 0s 1ms/step - loss: 9.7693 - mae: 2.2989 - val_loss: 39.6807 - val_mae: 4.1103
Epoch 41/100
11/11 [==============================] - 0s 1ms/step - loss: 9.5836 - mae: 2.2715 - val_loss: 39.4062 - val_mae: 4.1294
Epoch 42/100
11/11 [==============================] - 0s 1ms/step - loss: 9.7452 - mae: 2.3173 - val_loss: 37.6943 - val_mae: 3.9130
Epoch 43/100
11/11 [==============================] - 0s 1ms/step - loss: 9.2352 - mae: 2.2424 - val_loss: 38.8331 - val_mae: 4.0123
Epoch 44/100
11/11 [==============================] - 0s 2ms/step - loss: 9.4252 - mae: 2.2688 - val_loss: 40.9791 - val_mae: 4.0407
Epoch 45/100
11/11 [==============================] - 0s 2ms/step - loss: 9.6111 - mae: 2.2942 - val_loss: 40.0124 - val_mae: 3.9865
Epoch 46/100
11/11 [==============================] - 0s 2ms/step - loss: 8.9400 - mae: 2.2003 - val_loss: 37.3683 - val_mae: 3.8683
Epoch 47/100
11/11 [==============================] - 0s 2ms/step - loss: 8.8415 - mae: 2.1767 - val_loss: 34.6394 - val_mae: 3.5656
Epoch 48/100
11/11 [==============================] - 0s 2ms/step - loss: 9.4848 - mae: 2.2637 - val_loss: 35.6680 - val_mae: 3.7830
Epoch 49/100
11/11 [==============================] - 0s 2ms/step - loss: 9.2310 - mae: 2.2827 - val_loss: 35.3253 - val_mae: 3.7068
Epoch 50/100
11/11 [==============================] - 0s 1ms/step - loss: 8.5259 - mae: 2.1734 - val_loss: 34.6664 - val_mae: 3.6494
Epoch 51/100
11/11 [==============================] - 0s 1ms/step - loss: 8.1249 - mae: 2.1106 - val_loss: 33.7729 - val_mae: 3.5660
Epoch 52/100
11/11 [==============================] - 0s 2ms/step - loss: 8.0780 - mae: 2.0893 - val_loss: 34.5980 - val_mae: 3.6082
Epoch 53/100
11/11 [==============================] - 0s 2ms/step - loss: 7.9569 - mae: 2.0927 - val_loss: 34.7714 - val_mae: 3.7046
Epoch 54/100
11/11 [==============================] - 0s 2ms/step - loss: 7.7629 - mae: 2.0482 - val_loss: 33.1320 - val_mae: 3.4947
Epoch 55/100
11/11 [==============================] - 0s 2ms/step - loss: 8.1798 - mae: 2.1530 - val_loss: 33.2804 - val_mae: 3.5686
Epoch 56/100
11/11 [==============================] - 0s 2ms/step - loss: 8.0405 - mae: 2.1330 - val_loss: 32.2855 - val_mae: 3.2958
Epoch 57/100
11/11 [==============================] - 0s 2ms/step - loss: 8.6169 - mae: 2.2046 - val_loss: 32.0414 - val_mae: 3.4645
Epoch 58/100
11/11 [==============================] - 0s 2ms/step - loss: 7.6722 - mae: 2.0512 - val_loss: 30.9463 - val_mae: 3.2524
Epoch 59/100
11/11 [==============================] - 0s 1ms/step - loss: 7.2758 - mae: 1.9867 - val_loss: 31.4647 - val_mae: 3.3469
Epoch 60/100
11/11 [==============================] - 0s 1ms/step - loss: 7.6196 - mae: 2.0441 - val_loss: 31.2370 - val_mae: 3.3954
Epoch 61/100
11/11 [==============================] - 0s 2ms/step - loss: 7.4474 - mae: 2.0459 - val_loss: 32.2481 - val_mae: 3.3958
Epoch 62/100
11/11 [==============================] - 0s 2ms/step - loss: 6.9228 - mae: 1.9432 - val_loss: 30.1315 - val_mae: 3.2536
Epoch 63/100
11/11 [==============================] - 0s 2ms/step - loss: 6.8909 - mae: 1.9569 - val_loss: 29.5405 - val_mae: 3.2686
Epoch 64/100
11/11 [==============================] - 0s 2ms/step - loss: 7.0226 - mae: 2.0118 - val_loss: 28.3730 - val_mae: 3.1316
Epoch 65/100
11/11 [==============================] - 0s 1ms/step - loss: 6.8270 - mae: 1.9657 - val_loss: 27.2403 - val_mae: 3.0346
Epoch 66/100
11/11 [==============================] - 0s 1ms/step - loss: 6.5118 - mae: 1.9323 - val_loss: 33.6313 - val_mae: 3.4374
Epoch 67/100
11/11 [==============================] - 0s 2ms/step - loss: 6.4845 - mae: 1.9376 - val_loss: 29.8851 - val_mae: 3.1718
Epoch 68/100
11/11 [==============================] - 0s 2ms/step - loss: 6.2974 - mae: 1.8682 - val_loss: 28.0243 - val_mae: 3.0581
Epoch 69/100
11/11 [==============================] - 0s 2ms/step - loss: 6.2119 - mae: 1.8671 - val_loss: 29.0065 - val_mae: 3.1586
Epoch 70/100
11/11 [==============================] - 0s 2ms/step - loss: 6.1092 - mae: 1.8501 - val_loss: 28.3537 - val_mae: 3.1420
Epoch 71/100
11/11 [==============================] - 0s 2ms/step - loss: 6.2947 - mae: 1.8765 - val_loss: 25.6232 - val_mae: 2.9389
Epoch 72/100
11/11 [==============================] - 0s 1ms/step - loss: 6.0007 - mae: 1.8480 - val_loss: 27.2902 - val_mae: 3.0094
Epoch 73/100
11/11 [==============================] - 0s 1ms/step - loss: 5.8848 - mae: 1.8396 - val_loss: 28.4162 - val_mae: 3.3734
Epoch 74/100
11/11 [==============================] - 0s 1ms/step - loss: 6.6968 - mae: 1.9836 - val_loss: 26.4591 - val_mae: 2.9026
Epoch 75/100
11/11 [==============================] - 0s 1ms/step - loss: 6.1004 - mae: 1.8847 - val_loss: 26.2995 - val_mae: 3.2986
Epoch 76/100
11/11 [==============================] - 0s 2ms/step - loss: 5.8558 - mae: 1.8384 - val_loss: 27.0291 - val_mae: 2.9866
Epoch 77/100
11/11 [==============================] - 0s 1ms/step - loss: 5.8204 - mae: 1.8112 - val_loss: 25.0681 - val_mae: 3.1244
Epoch 78/100
11/11 [==============================] - 0s 2ms/step - loss: 5.6689 - mae: 1.7721 - val_loss: 26.6613 - val_mae: 3.0229
Epoch 79/100
11/11 [==============================] - 0s 1ms/step - loss: 5.5372 - mae: 1.7724 - val_loss: 26.5757 - val_mae: 3.0614
Epoch 80/100
11/11 [==============================] - 0s 2ms/step - loss: 5.3118 - mae: 1.7247 - val_loss: 25.3102 - val_mae: 3.1341
Epoch 81/100
11/11 [==============================] - 0s 1ms/step - loss: 5.4955 - mae: 1.7805 - val_loss: 25.1586 - val_mae: 2.9578
Epoch 82/100
11/11 [==============================] - 0s 1ms/step - loss: 5.0795 - mae: 1.7059 - val_loss: 22.7737 - val_mae: 2.8577
Epoch 83/100
11/11 [==============================] - 0s 1ms/step - loss: 4.9729 - mae: 1.6815 - val_loss: 23.8417 - val_mae: 2.9197
Epoch 84/100
11/11 [==============================] - 0s 1ms/step - loss: 4.8782 - mae: 1.6668 - val_loss: 23.6548 - val_mae: 2.9560
Epoch 85/100
11/11 [==============================] - 0s 2ms/step - loss: 4.9144 - mae: 1.6635 - val_loss: 23.8469 - val_mae: 2.9262
Epoch 86/100
11/11 [==============================] - 0s 1ms/step - loss: 4.7997 - mae: 1.6342 - val_loss: 23.2423 - val_mae: 3.0261
Epoch 87/100
11/11 [==============================] - 0s 2ms/step - loss: 4.9769 - mae: 1.6650 - val_loss: 21.7400 - val_mae: 2.9177
Epoch 88/100
11/11 [==============================] - 0s 2ms/step - loss: 4.8546 - mae: 1.6753 - val_loss: 26.3081 - val_mae: 3.3287
Epoch 89/100
11/11 [==============================] - 0s 2ms/step - loss: 5.1427 - mae: 1.7348 - val_loss: 27.0291 - val_mae: 3.0953
Epoch 90/100
11/11 [==============================] - 0s 2ms/step - loss: 5.8400 - mae: 1.8558 - val_loss: 26.5126 - val_mae: 3.1839
Epoch 91/100
11/11 [==============================] - 0s 2ms/step - loss: 4.8655 - mae: 1.7070 - val_loss: 25.8536 - val_mae: 3.1019
Epoch 92/100
11/11 [==============================] - 0s 2ms/step - loss: 4.6584 - mae: 1.6318 - val_loss: 25.4041 - val_mae: 3.1715
Epoch 93/100
11/11 [==============================] - 0s 2ms/step - loss: 4.5776 - mae: 1.5942 - val_loss: 24.7996 - val_mae: 3.1165
Epoch 94/100
11/11 [==============================] - 0s 2ms/step - loss: 4.5221 - mae: 1.5888 - val_loss: 21.7229 - val_mae: 2.8775
Epoch 95/100
11/11 [==============================] - 0s 2ms/step - loss: 4.5736 - mae: 1.6049 - val_loss: 23.3633 - val_mae: 3.1403
Epoch 96/100
11/11 [==============================] - 0s 2ms/step - loss: 4.2228 - mae: 1.5556 - val_loss: 23.3777 - val_mae: 3.0416
Epoch 97/100
11/11 [==============================] - 0s 2ms/step - loss: 4.0991 - mae: 1.5057 - val_loss: 23.1926 - val_mae: 3.1704
Epoch 98/100
11/11 [==============================] - 0s 2ms/step - loss: 4.8759 - mae: 1.6981 - val_loss: 20.7348 - val_mae: 2.7833
Epoch 99/100
11/11 [==============================] - 0s 2ms/step - loss: 4.2400 - mae: 1.5129 - val_loss: 22.7491 - val_mae: 3.1046
Epoch 100/100
11/11 [==============================] - 0s 2ms/step - loss: 4.6875 - mae: 1.6419 - val_loss: 22.9158 - val_mae: 3.1007
In [74]:
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scattergl(y=history.history['loss'],

name='Train'))

fig.add_trace(go.Scattergl(y=history.history['val_loss'],

name='Valid'))

fig.update_layout(height=500, width=700,
xaxis_title='Epoch',
yaxis_title='Loss')

fig.show()
In [75]:
fig = go.Figure()
fig.add_trace(go.Scattergl(y=history.history['mae'],

name='Train'))

fig.add_trace(go.Scattergl(y=history.history['val_mae'],

name='Valid'))

fig.update_layout(height=500, width=700,
xaxis_title='Epoch',
yaxis_title='Mean Absolute Error')

fig.show()
In [76]:
y_pred = model.predict(X_test)
mse_nn, mae_nn = model.evaluate(X_test, y_test)
print('Mean squared error on test data: ', mse_nn)
print('Mean absolute error on test data: ', mae_nn)
6/6 [==============================] - 0s 571us/step - loss: 11.0529 - mae: 2.2763
Mean squared error on test data:  11.052876472473145
Mean absolute error on test data:  2.2763397693634033
In [77]:
from sklearn.metrics import mean_absolute_error
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
Out[77]:
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
In [78]:
y_pred_lr = lr_model.predict(X_test)
mse_lr = mean_squared_error(y_test, y_pred_lr)
mae_lr = mean_absolute_error(y_test, y_pred_lr)
print('Mean squared error on test data: ', mse_lr)
print('Mean absolute error on test data: ', mae_lr)
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_pred)
print(r2)
Mean squared error on test data:  20.724023437339753
Mean absolute error on test data:  3.148255754816832
0.8539496049654687
In [79]:
from sklearn.metrics import mean_squared_error
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
print(rmse)
3.3245865973718143
In [80]:
import sklearn
new_data = sklearn.preprocessing.StandardScaler().fit_transform(([[0.1, 10.0,
5.0, 0, 0.4, 6.0, 50, 6.0, 1, 400, 20, 300, 10]]))
prediction = model.predict(new_data)
print("Predicted house price:", prediction)
Predicted house price: [[10.225188]]
In [ ]: